{
int i;
#ifdef __i386__ /* Remove when x86_64 VMX is implemented */
- unsigned long vmx_domain;
+#ifdef CONFIG_VMX
extern void save_vmx_execution_context(execution_context_t *);
+#endif
#endif
c->flags = 0;
sizeof(ed->arch.user_ctxt));
#ifdef __i386__
- vmx_domain = ed->arch.arch_vmx.flags;
- if (vmx_domain)
+#ifdef CONFIG_VMX
+ if ( VMX_DOMAIN(ed) )
save_vmx_execution_context(&c->cpu_ctxt);
+#endif
#endif
if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
struct tss_struct *tss = init_tss + smp_processor_id();
execution_context_t *stack_ec = get_execution_context();
int i;
-#ifdef CONFIG_VMX
- unsigned long vmx_domain = next_p->arch.arch_vmx.flags;
-#endif
__cli();
}
#ifdef CONFIG_VMX
- if ( vmx_domain )
+ if ( VMX_DOMAIN(next_p) )
{
/* Switch page tables. */
write_ptbase(next_p);
#include <xen/sched.h>
#include <asm/current.h>
+#ifdef CONFIG_VMX
+
/* for intercepting io request after vm_exit, return value: 0--not handle; 1--handled */
int vmx_io_intercept(ioreq_t *p)
{
}
}
+
+#endif /* CONFIG_VMX */
void vmx_do_resume(struct exec_domain *d)
{
- if ( d->arch.guest_vtable )
+ if ( test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state) )
__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
else
- // we haven't switched off the 1:1 pagetable yet...
- __vmwrite(GUEST_CR3, pagetable_val(d->arch.guest_table));
+ // paging is not enabled in the guest
+ __vmwrite(GUEST_CR3, pagetable_val(d->arch.phys_table));
__vmwrite(HOST_CR3, pagetable_val(d->arch.monitor_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
const char *context;
#ifdef CONFIG_VMX
- if ( current->arch.arch_vmx.flags && (regs->eflags == 0) )
+ if ( VMX_DOMAIN(current) && (regs->eflags == 0) )
{
__vmread(GUEST_EIP, &eip);
__vmread(GUEST_ESP, &esp);
struct domain *d, unsigned long gmfn);
static inline void shadow_invalidate(struct exec_domain *ed) {
- if ( !ed->arch.arch_vmx.flags )
+ if ( !VMX_DOMAIN(ed) )
BUG();
memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
}
}
}
- if ( ed->arch.arch_vmx.flags )
+ if ( VMX_DOMAIN(ed) )
{
// Why is VMX mode doing this?
shadow_invalidate(ed);
static inline void update_pagetables(struct exec_domain *ed)
{
- if ( unlikely(shadow_mode_enabled(ed->domain)) )
+ struct domain *d = ed->domain;
+
+ if ( unlikely(shadow_mode_enabled(d)) )
{
- shadow_lock(ed->domain);
+ shadow_lock(d);
__update_pagetables(ed);
- shadow_unlock(ed->domain);
+ shadow_unlock(d);
}
- if ( !shadow_mode_external(ed->domain) )
+ if ( !shadow_mode_external(d) )
{
#ifdef __x86_64__
if ( !(ed->arch.flags & TF_kernel_mode) )
ed->arch.monitor_table = ed->arch.guest_table_user;
else
#endif
- if ( shadow_mode_enabled(ed->domain) )
+ if ( shadow_mode_enabled(d) )
ed->arch.monitor_table = ed->arch.shadow_table;
else
ed->arch.monitor_table = ed->arch.guest_table;